From 8a9db38089f0e071fed513d562c6f3cd3bad02a6 Mon Sep 17 00:00:00 2001 From: "kaf24@firebug.cl.cam.ac.uk" Date: Fri, 29 Jul 2005 10:31:22 +0000 Subject: [PATCH] Attached patch adds a DMA zone to xen, also modifies xen_contig_memory() to ask for DMA pages. Signed-off-by: srparish@us.ibm.com --- .../arch/xen/i386/mm/hypervisor.c | 2 +- xen/arch/x86/domain_build.c | 2 +- xen/arch/x86/x86_32/mm.c | 2 +- xen/arch/x86/x86_64/mm.c | 2 +- xen/common/dom_mem_ops.c | 14 ++++-- xen/common/page_alloc.c | 44 +++++++++++++------ xen/include/xen/mm.h | 7 ++- 7 files changed, 51 insertions(+), 22 deletions(-) diff --git a/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c index 0e19e846fd..7a9e73fa88 100644 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c @@ -296,7 +296,7 @@ void xen_contig_memory(unsigned long vstart, unsigned int order) /* 2. Get a new contiguous memory extent. */ BUG_ON(HYPERVISOR_dom_mem_op( - MEMOP_increase_reservation, &mfn, 1, order) != 1); + MEMOP_increase_reservation, &mfn, 1, order | (32<<8)) != 1); /* 3. Map the new extent in place of old pages. */ for (i = 0; i < (1<> L2_PAGETABLE_SHIFT); i++ ) { - if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER)) == NULL ) + if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0) == NULL ) panic("Not enough memory to bootstrap Xen.\n"); idle_pg_table_l2[l2_linear_offset(RDWR_MPT_VIRT_START) + i] = l2e_from_page(pg, PAGE_HYPERVISOR | _PAGE_PSE); diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c index ae3dac0b6b..438bff4754 100644 --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -100,7 +100,7 @@ void __init paging_init(void) */ for ( i = 0; i < max_page; i += ((1UL << L2_PAGETABLE_SHIFT) / 8) ) { - pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER); + pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0); if ( pg == NULL ) panic("Not enough memory for m2p table\n"); map_pages_to_xen( diff --git a/xen/common/dom_mem_ops.c b/xen/common/dom_mem_ops.c index ad53b91d1c..e3a459ff9c 100644 --- a/xen/common/dom_mem_ops.c +++ b/xen/common/dom_mem_ops.c @@ -37,7 +37,8 @@ alloc_dom_mem(struct domain *d, unsigned long *extent_list, unsigned long start_extent, unsigned int nr_extents, - unsigned int extent_order) + unsigned int extent_order, + unsigned int flags) { struct pfn_info *page; unsigned long i; @@ -56,7 +57,8 @@ alloc_dom_mem(struct domain *d, { PREEMPT_CHECK(MEMOP_increase_reservation); - if ( unlikely((page = alloc_domheap_pages(d, extent_order)) == NULL) ) + if ( unlikely((page = alloc_domheap_pages(d, extent_order, + flags)) == NULL) ) { DPRINTK("Could not allocate a frame\n"); return i; @@ -131,11 +133,16 @@ do_dom_mem_op(unsigned long op, { struct domain *d; unsigned long rc, start_extent; + unsigned int address_bits_order; /* Extract @start_extent from @op. */ start_extent = op >> START_EXTENT_SHIFT; op &= (1 << START_EXTENT_SHIFT) - 1; + /* seperate extent_order and address_bits_order */ + address_bits_order = (extent_order >> 1) & 0xff; + extent_order &= 0xff; + if ( unlikely(start_extent > nr_extents) ) return -EINVAL; @@ -150,7 +157,8 @@ do_dom_mem_op(unsigned long op, { case MEMOP_increase_reservation: rc = alloc_dom_mem( - d, extent_list, start_extent, nr_extents, extent_order); + d, extent_list, start_extent, nr_extents, extent_order, + (address_bits_order <= 32) ? ALLOC_DOM_DMA : 0); break; case MEMOP_decrease_reservation: rc = free_dom_mem( diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index e8c70d63b7..920987bf32 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -207,7 +207,13 @@ unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align) #define MEMZONE_XEN 0 #define MEMZONE_DOM 1 -#define NR_ZONES 2 +#define MEMZONE_DMADOM 2 +#define NR_ZONES 3 + + +#define MAX_DMADOM_PFN 0xFFFFF +#define pfn_dom_zone_type(_pfn) \ + (((_pfn) <= MAX_DMADOM_PFN) ? MEMZONE_DMADOM : MEMZONE_DOM) /* Up to 2^20 pages can be allocated at once. */ #define MAX_ORDER 20 @@ -236,7 +242,7 @@ void end_boot_allocator(void) if ( next_free ) map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */ if ( curr_free ) - free_heap_pages(MEMZONE_DOM, pfn_to_page(i), 0); + free_heap_pages(pfn_dom_zone_type(i), pfn_to_page(i), 0); } } @@ -474,14 +480,21 @@ void init_domheap_pages(physaddr_t ps, physaddr_t pe) { ASSERT(!in_irq()); - ps = round_pgup(ps); - pe = round_pgdown(pe); + ps = round_pgup(ps) >> PAGE_SHIFT; + pe = round_pgdown(pe) >> PAGE_SHIFT; - init_heap_pages(MEMZONE_DOM, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT); + if (ps < MAX_DMADOM_PFN && pe > MAX_DMADOM_PFN) { + init_heap_pages(MEMZONE_DMADOM, pfn_to_page(ps), MAX_DMADOM_PFN - ps); + init_heap_pages(MEMZONE_DOM, pfn_to_page(MAX_DMADOM_PFN), + pe - MAX_DMADOM_PFN); + } + else + init_heap_pages(pfn_dom_zone_type(ps), pfn_to_page(ps), pe - ps); } -struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order) +struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order, + unsigned int flags) { struct pfn_info *pg; cpumask_t mask; @@ -489,8 +502,13 @@ struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order) ASSERT(!in_irq()); - if ( unlikely((pg = alloc_heap_pages(MEMZONE_DOM, order)) == NULL) ) - return NULL; + pg = NULL; + if (! (flags & ALLOC_DOM_DMA)) + pg = alloc_heap_pages(MEMZONE_DOM, order); + if (pg == NULL) { + if ( unlikely((pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL) ) + return NULL; + } mask = pg->u.free.cpumask; tlbflush_filter(mask, pg->tlbflush_timestamp); @@ -531,7 +549,7 @@ struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order) DPRINTK("...or the domain is dying (%d)\n", !!test_bit(_DOMF_dying, &d->domain_flags)); spin_unlock(&d->page_alloc_lock); - free_heap_pages(MEMZONE_DOM, pg, order); + free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order); return NULL; } @@ -596,7 +614,7 @@ void free_domheap_pages(struct pfn_info *pg, unsigned int order) if ( likely(!test_bit(_DOMF_dying, &d->domain_flags)) ) { - free_heap_pages(MEMZONE_DOM, pg, order); + free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order); } else { @@ -616,7 +634,7 @@ void free_domheap_pages(struct pfn_info *pg, unsigned int order) else { /* Freeing an anonymous domain-heap page. */ - free_heap_pages(MEMZONE_DOM, pg, order); + free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order); drop_dom_ref = 0; } @@ -627,7 +645,7 @@ void free_domheap_pages(struct pfn_info *pg, unsigned int order) unsigned long avail_domheap_pages(void) { - return avail[MEMZONE_DOM]; + return avail[MEMZONE_DOM] + avail[MEMZONE_DMADOM]; } @@ -676,7 +694,7 @@ static void page_scrub_softirq(void) p = map_domain_page(page_to_pfn(pg)); clear_page(p); unmap_domain_page(p); - free_heap_pages(MEMZONE_DOM, pg, 0); + free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, 0); } } while ( (NOW() - start) < MILLISECS(1) ); } diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index 1919b5e9e7..d3d7a21422 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -33,12 +33,15 @@ void free_xenheap_pages(void *v, unsigned int order); /* Domain suballocator. These functions are *not* interrupt-safe.*/ void init_domheap_pages(physaddr_t ps, physaddr_t pe); -struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order); +struct pfn_info *alloc_domheap_pages( + struct domain *d, unsigned int order, unsigned int flags); void free_domheap_pages(struct pfn_info *pg, unsigned int order); unsigned long avail_domheap_pages(void); -#define alloc_domheap_page(d) (alloc_domheap_pages(d,0)) +#define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0)) #define free_domheap_page(p) (free_domheap_pages(p,0)) +#define ALLOC_DOM_DMA 1 + /* Automatic page scrubbing for dead domains. */ extern struct list_head page_scrub_list; #define page_scrub_schedule_work() \ -- 2.30.2